{
ASSERT(frame_table[gmfn].count_info & PGC_page_table);
- if ( shadow_max_pgtable_type(d, gpfn) == PGT_none )
+ if ( shadow_max_pgtable_type(d, gpfn, NULL) == PGT_none )
{
clear_bit(_PGC_page_table, &frame_table[gmfn].count_info);
break;
}
+ // Don't add a new shadow of something that already has a snapshot.
+ //
+ ASSERT( (psh_type == PGT_snapshot) || !mfn_out_of_sync(gmfn) );
+
set_shadow_status(d, gpfn, gmfn, smfn, psh_type);
if ( pin )
shadow_make_snapshot(
struct domain *d, unsigned long gpfn, unsigned long gmfn)
{
- unsigned long smfn;
+ unsigned long smfn, sl1mfn;
void *original, *snapshot;
+ u32 min_max = 0;
+ int min, max, length;
if ( test_and_set_bit(_PGC_out_of_sync, &frame_table[gmfn].count_info) )
{
if ( !get_shadow_ref(smfn) )
BUG();
+ if ( shadow_max_pgtable_type(d, gpfn, &sl1mfn) == PGT_l1_shadow )
+ min_max = pfn_to_page(sl1mfn)->tlbflush_timestamp;
+ pfn_to_page(smfn)->tlbflush_timestamp = min_max;
+
+ min = SHADOW_MIN(min_max);
+ max = SHADOW_MAX(min_max);
+ length = max - min + 1;
+ perfc_incr_histo(snapshot_copies, length, PT_UPDATES);
+
+ min *= sizeof(l1_pgentry_t);
+ length *= sizeof(l1_pgentry_t);
+
original = map_domain_mem(gmfn << PAGE_SHIFT);
snapshot = map_domain_mem(smfn << PAGE_SHIFT);
- memcpy(snapshot, original, PAGE_SIZE);
+ memcpy(snapshot + min, original + min, length);
unmap_domain_mem(original);
unmap_domain_mem(snapshot);
unsigned long *guest, *shadow, *snapshot;
int need_flush = 0, external = shadow_mode_external(d);
int unshadow;
- u32 min_max;
- int min, max;
+ int changed;
ASSERT(spin_is_locked(&d->arch.shadow_lock));
switch ( stype ) {
case PGT_l1_shadow:
- min_max = pfn_to_page(smfn)->tlbflush_timestamp;
- min = SHADOW_MIN(min_max);
- max = SHADOW_MAX(min_max);
- for ( i = min; i <= max; i++ )
+ {
+ u32 min_max_shadow = pfn_to_page(smfn)->tlbflush_timestamp;
+ int min_shadow = SHADOW_MIN(min_max_shadow);
+ int max_shadow = SHADOW_MAX(min_max_shadow);
+
+ u32 min_max_snapshot =
+ pfn_to_page(entry->snapshot_mfn)->tlbflush_timestamp;
+ int min_snapshot = SHADOW_MIN(min_max_snapshot);
+ int max_snapshot = SHADOW_MAX(min_max_snapshot);
+
+ changed = 0;
+
+ for ( i = min_shadow; i <= max_shadow; i++ )
{
- unsigned new_pte = guest[i];
- if ( new_pte != snapshot[i] )
+ if ( (i < min_snapshot) || (i > max_snapshot) ||
+ (guest[i] != snapshot[i]) )
{
- need_flush |= validate_pte_change(d, new_pte, &shadow[i]);
+ need_flush |= validate_pte_change(d, guest[i], &shadow[i]);
// can't update snapshots of linear page tables -- they
// are used multiple times...
//
// snapshot[i] = new_pte;
+
+ changed++;
}
}
+ perfc_incrc(resync_l1);
+ perfc_incr_histo(wpt_updates, changed, PT_UPDATES);
+ perfc_incr_histo(l1_entries_checked, max_shadow - min_shadow + 1, PT_UPDATES);
break;
+ }
case PGT_l2_shadow:
- max = -1;
+ {
+ int max = -1;
+
+ changed = 0;
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
{
if ( !is_guest_l2_slot(i) && !external )
continue;
- unsigned new_pde = guest[i];
+ unsigned long new_pde = guest[i];
if ( new_pde != snapshot[i] )
{
need_flush |= validate_pde_change(d, new_pde, &shadow[i]);
// are used multiple times...
//
// snapshot[i] = new_pde;
+
+ changed++;
}
if ( new_pde != 0 )
max = i;
}
if ( max == -1 )
unshadow = 1;
+ perfc_incrc(resync_l2);
+ perfc_incr_histo(shm_l2_updates, changed, PT_UPDATES);
break;
- default:
+ }
+ case PGT_hl2_shadow:
+ changed = 0;
for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
{
if ( !is_guest_l2_slot(i) && !external )
continue;
- unsigned new_pde = guest[i];
+ unsigned long new_pde = guest[i];
if ( new_pde != snapshot[i] )
{
need_flush |= validate_hl2e_change(d, new_pde, &shadow[i]);
// are used multiple times...
//
// snapshot[i] = new_pde;
+
+ changed++;
}
}
+ perfc_incrc(resync_hl2);
+ perfc_incr_histo(shm_hl2_updates, changed, PT_UPDATES);
break;
+ default:
+ BUG();
}
unmap_domain_mem(shadow);
unmap_domain_mem(ppte);
}
- // XXX mafetter: SMP perf bug.
+ // XXX mafetter: SMP
//
// With the current algorithm, we've gotta flush all the TLBs
// before we can safely continue. I don't think we want to
// (any path from a PTE that grants write access to an out-of-sync
// page table page needs to be vcpu private).
//
- flush_tlb_all();
+#if 0 // this should be enabled for SMP guests...
+ flush_tlb_mask(((1 << smp_num_cpus) - 1) & ~(1 << smp_processor_id()));
+#endif
+ need_flush = 1;
// Second, resync all L1 pages, then L2 pages, etc...
//
need_flush |= resync_all(d, PGT_hl2_shadow);
need_flush |= resync_all(d, PGT_l2_shadow);
- if ( need_flush )
+ if ( need_flush && !unlikely(shadow_mode_external(d)) )
local_flush_tlb();
free_out_of_sync_state(d);
#define shadow_lock(_d) do { ASSERT(!spin_is_locked(&(_d)->arch.shadow_lock)); spin_lock(&(_d)->arch.shadow_lock); } while (0)
#define shadow_unlock(_d) spin_unlock(&(_d)->arch.shadow_lock)
-#define SHADOW_ENCODE_MIN_MAX(_min, _max) (((L1_PAGETABLE_ENTRIES - (_max)) << 16) | (_min))
+#define SHADOW_ENCODE_MIN_MAX(_min, _max) ((((L1_PAGETABLE_ENTRIES - 1) - (_max)) << 16) | (_min))
#define SHADOW_MIN(_encoded) ((_encoded) & ((1u<<16) - 1))
-#define SHADOW_MAX(_encoded) (L1_PAGETABLE_ENTRIES - ((_encoded) >> 16))
+#define SHADOW_MAX(_encoded) ((L1_PAGETABLE_ENTRIES - 1) - ((_encoded) >> 16))
extern void shadow_mode_init(void);
extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
if ( d->arch.out_of_sync && __shadow_out_of_sync(ed, va) )
{
+ perfc_incrc(shadow_sync_va);
+
// XXX - could be smarter
//
__shadow_sync_all(ed->domain);
* Either returns PGT_none, or PGT_l{1,2,3,4}_page_table.
*/
static inline unsigned long
-shadow_max_pgtable_type(struct domain *d, unsigned long gpfn)
+shadow_max_pgtable_type(struct domain *d, unsigned long gpfn,
+ unsigned long *smfn)
{
struct shadow_status *x;
unsigned long pttype = PGT_none, type;
ASSERT(spin_is_locked(&d->arch.shadow_lock));
ASSERT(gpfn == (gpfn & PGT_mfn_mask));
+ perfc_incrc(shadow_max_type);
+
x = hash_bucket(d, gpfn);
while ( x && x->gpfn_and_flags )
}
if ( type > pttype )
+ {
pttype = type;
+ if ( smfn )
+ *smfn = x->smfn;
+ }
}
next:
x = x->next;
#define PERFC_PT_UPDATES_BUCKET_SIZE 3
PERFCOUNTER_ARRAY( wpt_updates, "writable pt updates", PERFC_MAX_PT_UPDATES )
PERFCOUNTER_ARRAY( bpt_updates, "batched pt updates", PERFC_MAX_PT_UPDATES )
+PERFCOUNTER_ARRAY( l1_entries_checked, "l1 entries checked", PERFC_MAX_PT_UPDATES )
+PERFCOUNTER_ARRAY( shm_l2_updates, "shadow mode L2 pt updates", PERFC_MAX_PT_UPDATES )
+PERFCOUNTER_ARRAY( shm_hl2_updates, "shadow mode HL2 pt updates", PERFC_MAX_PT_UPDATES )
+PERFCOUNTER_ARRAY( snapshot_copies, "entries copied per snapshot", PERFC_MAX_PT_UPDATES )
PERFCOUNTER_ARRAY( hypercalls, "hypercalls", NR_hypercalls )
PERFCOUNTER_ARRAY( exceptions, "exceptions", 32 )
PERFCOUNTER_CPU( calls_to_update_va, "calls_to_update_va_map" )
PERFCOUNTER_CPU( page_faults, "page faults" )
PERFCOUNTER_CPU( copy_user_faults, "copy_user faults" )
+
+PERFCOUNTER_CPU(shadow_fault_calls, "calls to shadow_fault")
+PERFCOUNTER_CPU(shadow_fault_bail_pde_not_present, "sf bailed due to pde not present")
+PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, "sf bailed due to pte not present")
+PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "sf bailed due to a ro mapping")
+PERFCOUNTER_CPU(shadow_fault_fixed, "sf fixed the pgfault")
+PERFCOUNTER_CPU(write_fault_bail, "sf bailed due to write_fault")
+PERFCOUNTER_CPU(read_fault_bail, "sf bailed due to read_fault")
+
PERFCOUNTER_CPU( map_domain_mem_count, "map_domain_mem count" )
-PERFCOUNTER_CPU( shadow_l2_table_count, "shadow_l2_table count" )
-PERFCOUNTER_CPU( shadow_l1_table_count, "shadow_l1_table count" )
-PERFCOUNTER_CPU( unshadow_table_count, "unshadow_table count" )
-PERFCOUNTER_CPU( shadow_fixup_count, "shadow_fixup count" )
+PERFCOUNTER_CPU( shadow_l2_table_count, "shadow_l2_table count" )
+PERFCOUNTER_CPU( shadow_l1_table_count, "shadow_l1_table count" )
+PERFCOUNTER_CPU( unshadow_table_count, "unshadow_table count" )
+PERFCOUNTER_CPU( shadow_fixup_count, "shadow_fixup count" )
PERFCOUNTER_CPU( shadow_update_va_fail1, "shadow_update_va_fail1" )
PERFCOUNTER_CPU( shadow_update_va_fail2, "shadow_update_va_fail2" )
PERFCOUNTER_CPU(shadow_status_calls, "calls to ___shadow_status" )
PERFCOUNTER_CPU(shadow_status_miss, "missed shadow cache" )
PERFCOUNTER_CPU(shadow_status_hit_head, "hits on head of bucket" )
+PERFCOUNTER_CPU(shadow_max_type, "calls to shadow_max_type" )
PERFCOUNTER_CPU(shadow_sync_all, "calls to shadow_sync_all")
+PERFCOUNTER_CPU(shadow_sync_va, "calls to shadow_sync_va")
+PERFCOUNTER_CPU(resync_l1, "resync L1 page")
+PERFCOUNTER_CPU(resync_l2, "resync L2 page")
+PERFCOUNTER_CPU(resync_hl2, "resync HL2 page")
PERFCOUNTER_CPU(shadow_make_snapshot, "snapshots created")
PERFCOUNTER_CPU(shadow_mark_mfn_out_of_sync_calls, "calls to shadow_mk_out_of_sync")
PERFCOUNTER_CPU(shadow_out_of_sync_calls, "calls to shadow_out_of_sync")
PERFCOUNTER_CPU(snapshot_entry_matches_calls, "calls to ss_entry_matches")
PERFCOUNTER_CPU(snapshot_entry_matches_true, "ss_entry_matches returns true")
-PERFCOUNTER_CPU(shadow_fault_calls, "calls to shadow_fault")
-PERFCOUNTER_CPU(shadow_fault_bail_pde_not_present, "sf bailed due to pde not present")
-PERFCOUNTER_CPU(shadow_fault_bail_pte_not_present, "sf bailed due to pte not present")
-PERFCOUNTER_CPU(shadow_fault_bail_ro_mapping, "sf bailed due to a ro mapping")
-PERFCOUNTER_CPU(shadow_fault_fixed, "sf fixed the pgfault")
PERFCOUNTER_CPU(validate_pte_calls, "calls to validate_pte_change")
PERFCOUNTER_CPU(validate_pte_changes, "validate_pte makes changes")
PERFCOUNTER_CPU(validate_pde_calls, "calls to validate_pde_change")
PERFCOUNTER_CPU(remove_write_fast_exit, "remove_write hit predicted entry")
PERFCOUNTER_CPU(remove_write_predicted, "remove_write predict hit&exit")
PERFCOUNTER_CPU(remove_write_bad_prediction, "remove_write bad prediction")
-PERFCOUNTER_CPU(write_fault_bail, "sf bailed due to write_fault")
-PERFCOUNTER_CPU(read_fault_bail, "sf bailed due to read_fault")
-PERFCOUNTER_CPU(update_hl2e_invlpg, "update_hl2e calls invlpg")
+PERFCOUNTER_CPU(update_hl2e_invlpg, "update_hl2e calls invlpg")